More x86/64 stuff.
Signed-off-by: keir.fraser@cl.cam.ac.uk
3ddb79c3M2n1ROZH6xk3HbyN4CPDqg xen/include/asm-x86/x86_32/uaccess.h
41bf1717bML6GxpclTWJabiaO5W5vg xen/include/asm-x86/x86_64/asm_defns.h
404f1b9ceJeGVaPNIENm2FkK0AgEOQ xen/include/asm-x86/x86_64/current.h
-404f1b9fl6AQ_a-T1TDK3fuwTPXmHw xen/include/asm-x86/x86_64/desc.h
41febc4b1aCGLsm0Y0b_82h7lFtrEA xen/include/asm-x86/x86_64/domain_page.h
404f1badfXZJZ2sU8sh9PS2EZvd19Q xen/include/asm-x86/x86_64/ldt.h
404f1bb86rAXB3aLS1vYdcqpJiEcyg xen/include/asm-x86/x86_64/regs.h
/* Install relocated selectors (FS/GS unused). */
lgdt gdt_descr(%rip)
- mov $(__HYPERVISOR_DS),%ecx
- mov %ecx,%ds
- mov %ecx,%es
- mov %ecx,%ss
/* Enable full CR4 features. */
mov mmu_cr4_features(%rip),%rcx
.asciz "Unknown interrupt\n"
ignore_int:
cld
- mov $(__HYPERVISOR_DS),%eax
- mov %eax,%ds
- mov %eax,%es
lea int_msg(%rip),%rdi
call SYMBOL_NAME(printf)
1: jmp 1b
/******************************************************************************
- * arch/i386/traps.c
+ * arch/x86/traps.c
*
* Modifications to Linux original are copyright (c) 2002-2004, K A Fraser
*
#endif
string_param("nmi", opt_nmi);
-#if defined(__i386__)
-
#define GUEST_FAULT(_r) (likely(VM86_MODE(_r) || !RING_0(_r)))
#define DOUBLEFAULT_STACK_SIZE 1024
asmlinkage int hypercall(void);
/* Master table, and the one used by CPU0. */
-struct desc_struct idt_table[256] = { {0, 0}, };
+idt_entry_t idt_table[IDT_ENTRIES] = { {0, 0}, };
/* All other CPUs have their own copy. */
-struct desc_struct *idt_tables[NR_CPUS] = { 0 };
+idt_entry_t *idt_tables[NR_CPUS] = { 0 };
asmlinkage void divide_error(void);
asmlinkage void debug(void);
}
-void show_guest_stack()
+void show_guest_stack(void)
{
int i;
execution_context_t *ec = get_execution_context();
gs = __HYPERVISOR_DS;
}
- printk("CPU: %d\nEIP: %04x:[<%08x>] \nEFLAGS: %08x\n",
+ printk("CPU: %d\nEIP: %04lx:[<%08lx>] \nEFLAGS: %08lx\n",
smp_processor_id(), 0xffff & regs->cs, regs->eip, regs->eflags);
- printk("eax: %08x ebx: %08x ecx: %08x edx: %08x\n",
+ printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
regs->eax, regs->ebx, regs->ecx, regs->edx);
- printk("esi: %08x edi: %08x ebp: %08x esp: %08lx\n",
+ printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
regs->esi, regs->edi, regs->ebp, esp);
printk("ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
ds, es, fs, gs, ss);
if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
{
- DPRINTK("Trap %d: %08x -> %08lx\n", trapnr, regs->eip, fixup);
+ DPRINTK("Trap %d: %08lx -> %08lx\n", trapnr, regs->eip, fixup);
regs->eip = fixup;
return 0;
}
{
perfc_incrc(copy_user_faults);
if ( !ed->mm.shadow_mode )
- DPRINTK("Page fault: %08x -> %08lx\n", regs->eip, fixup);
+ DPRINTK("Page fault: %08lx -> %08lx\n", regs->eip, fixup);
regs->eip = fixup;
return 0;
}
if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
{
- DPRINTK("GPF (%04x): %08x -> %08lx\n",
+ DPRINTK("GPF (%04x): %08lx -> %08lx\n",
regs->error_code, regs->eip, fixup);
regs->eip = fixup;
return 0;
if ( (reg < 0) || (reg > 7) ) return -EINVAL;
return current->thread.debugreg[reg];
}
-
-#else
-
-asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs)
-{
-}
-
-#endif /* __i386__ */
eip, error_code);
VMX_DBG_LOG(DBG_LEVEL_1,
- "eax=%x, ebx=%x, ecx=%x, edx=%x, esi=%x, edi=%x\n",
+ "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx\n",
regs->eax, regs->ebx, regs->ecx, regs->edx, regs->esi, regs->edi);
/* Reflect it back into the guest */
__vmread(GUEST_EIP, &eip);
VMX_DBG_LOG(DBG_LEVEL_1,
- "do_cpuid: (eax) %x, (ebx) %x, (ecx) %x, (edx) %x, (esi) %x, (edi) %x\n", regs->eax, regs->ebx, regs->ecx, regs->edx, regs->esi, regs->edi);
+ "do_cpuid: (eax) %lx, (ebx) %lx, (ecx) %lx, (edx) %lx,"
+ " (esi) %lx, (edi) %lx\n",
+ regs->eax, regs->ebx, regs->ecx, regs->edx,
+ regs->esi, regs->edi);
cpuid(input, &eax, &ebx, &ecx, &edx);
static void vmx_dr_access (unsigned long exit_qualification, struct xen_regs *regs)
{
unsigned int reg;
- u32 *reg_p = 0;
+ unsigned long *reg_p = 0;
struct exec_domain *ed = current;
- u32 eip;
+ unsigned long eip;
__vmread(GUEST_EIP, &eip);
reg = exit_qualification & DEBUG_REG_ACCESS_NUM;
VMX_DBG_LOG(DBG_LEVEL_1,
- "vmx_dr_access : eip=%08x, reg=%d, exit_qualification = %lx\n",
+ "vmx_dr_access : eip=%lx, reg=%d, exit_qualification = %lx\n",
eip, reg, exit_qualification);
switch(exit_qualification & DEBUG_REG_ACCESS_REG) {
static inline void vmx_do_msr_read(struct xen_regs *regs)
{
- VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read: ecx=%x, eax=%x, edx=%x",
+ VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read: ecx=%lx, eax=%lx, edx=%lx",
regs->ecx, regs->eax, regs->edx);
rdmsr(regs->ecx, regs->eax, regs->edx);
- VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read returns: ecx=%x, eax=%x, edx=%x",
- regs->ecx, regs->eax, regs->edx);
+ VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read returns: "
+ "ecx=%lx, eax=%lx, edx=%lx",
+ regs->ecx, regs->eax, regs->edx);
}
/*
__vmread(EXIT_QUALIFICATION, &va);
__vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
VMX_DBG_LOG(DBG_LEVEL_VMMU,
- "eax=%x, ebx=%x, ecx=%x, edx=%x, esi=%x, edi=%x\n", regs.eax, regs.ebx, regs.ecx, regs.edx, regs.esi, regs.edi);
+ "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx\n",
+ regs.eax, regs.ebx, regs.ecx, regs.edx, regs.esi,
+ regs.edi);
d->thread.arch_vmx.vmx_platform.mpci.inst_decoder_regs = ®s;
if (!(error = vmx_do_page_fault(va, error_code))) {
break;
case EXIT_REASON_MSR_WRITE:
__vmread(GUEST_EIP, &eip);
- VMX_DBG_LOG(DBG_LEVEL_1, "MSR_WRITE: eip=%08lx, eax=%08x, edx=%08x",
+ VMX_DBG_LOG(DBG_LEVEL_1, "MSR_WRITE: eip=%08lx, eax=%08lx, edx=%08lx",
eip, regs.eax, regs.edx);
/* just ignore this point */
__get_instruction_length(inst_len);
u32 disp32 = 0;
u8 *eip; /* ptr to instruction start */
u8 *pb, b; /* ptr into instr. / current instr. byte */
- unsigned int *pseg = NULL; /* segment for memory operand (NULL=default) */
+ unsigned long *pseg = NULL; /* segment for memory operand (NULL=default) */
/* WARNING: We only work for ring-3 segments. */
if ( unlikely(VM86_MODE(regs)) || unlikely(!RING_3(regs)) )
OFFSET(XREGS_rdx, struct xen_regs, rdx);
OFFSET(XREGS_rsi, struct xen_regs, rsi);
OFFSET(XREGS_rdi, struct xen_regs, rdi);
- OFFSET(XREGS_orig_rax, struct xen_regs, orig_rax);
+ OFFSET(XREGS_error_code, struct xen_regs, error_code);
+ OFFSET(XREGS_entry_vector, struct xen_regs, entry_vector);
OFFSET(XREGS_rip, struct xen_regs, rip);
OFFSET(XREGS_cs, struct xen_regs, cs);
OFFSET(XREGS_eflags, struct xen_regs, eflags);
#ifndef __ASSEMBLY__
struct desc_struct {
- unsigned long a,b;
+ u32 a, b;
};
+#if defined(__x86_64__)
+typedef struct {
+ u64 a, b;
+} idt_entry_t;
+#elif defined(__i386__)
+typedef struct desc_struct idt_entry_t;
+#endif
+
extern struct desc_struct gdt_table[];
-extern struct desc_struct *idt, *gdt;
+extern struct desc_struct *gdt;
+extern idt_entry_t *idt;
struct Xgt_desc_struct {
- unsigned short size;
- unsigned long address __attribute__((packed));
+ unsigned short size;
+ unsigned long address __attribute__((packed));
};
#define idt_descr (*(struct Xgt_desc_struct *)((char *)&idt - 2))
};
#define IDT_ENTRIES 256
-extern struct desc_struct idt_table[];
-extern struct desc_struct *idt_tables[];
+extern idt_entry_t idt_table[];
+extern idt_entry_t *idt_tables[];
#if defined(__i386__)
+#ifndef __X86_REGS_H__
+#define __X86_REGS_H__
+
#ifdef __x86_64__
#include <asm/x86_64/regs.h>
#else
#include <asm/x86_32/regs.h>
#endif
+
+enum EFLAGS {
+ EF_CF = 0x00000001,
+ EF_PF = 0x00000004,
+ EF_AF = 0x00000010,
+ EF_ZF = 0x00000040,
+ EF_SF = 0x00000080,
+ EF_TF = 0x00000100,
+ EF_IE = 0x00000200,
+ EF_DF = 0x00000400,
+ EF_OF = 0x00000800,
+ EF_IOPL = 0x00003000,
+ EF_IOPL_RING0 = 0x00000000,
+ EF_IOPL_RING1 = 0x00001000,
+ EF_IOPL_RING2 = 0x00002000,
+ EF_NT = 0x00004000, /* nested task */
+ EF_RF = 0x00010000, /* resume */
+ EF_VM = 0x00020000, /* virtual mode */
+ EF_AC = 0x00040000, /* alignment */
+ EF_VIF = 0x00080000, /* virtual interrupt */
+ EF_VIP = 0x00100000, /* virtual interrupt pending */
+ EF_ID = 0x00200000, /* id */
+};
+
+#endif /* __X86_REGS_H__ */
#include <asm/types.h>
+/* So that we can use 'l' modifier in printf-style format strings. */
+#define u32 unsigned long
+
struct xen_regs
{
/* All saved activations contain the following fields. */
u32 gs;
} __attribute__ ((packed));
-enum EFLAGS {
- EF_CF = 0x00000001,
- EF_PF = 0x00000004,
- EF_AF = 0x00000010,
- EF_ZF = 0x00000040,
- EF_SF = 0x00000080,
- EF_TF = 0x00000100,
- EF_IE = 0x00000200,
- EF_DF = 0x00000400,
- EF_OF = 0x00000800,
- EF_IOPL = 0x00003000,
- EF_IOPL_RING0 = 0x00000000,
- EF_IOPL_RING1 = 0x00001000,
- EF_IOPL_RING2 = 0x00002000,
- EF_NT = 0x00004000, /* nested task */
- EF_RF = 0x00010000, /* resume */
- EF_VM = 0x00020000, /* virtual mode */
- EF_AC = 0x00040000, /* alignment */
- EF_VIF = 0x00080000, /* virtual interrupt */
- EF_VIP = 0x00100000, /* virtual interrupt pending */
- EF_ID = 0x00200000, /* id */
-};
+#undef u32
#define VM86_MODE(_r) ((_r)->eflags & EF_VM)
#define RING_0(_r) (((_r)->cs & 3) == 0)
+++ /dev/null
-#ifndef __ARCH_DESC_H
-#define __ARCH_DESC_H
-
-#define LDT_ENTRY_SIZE 16
-
-#define __DOUBLEFAULT_TSS_ENTRY FIRST_RESERVED_GDT_ENTRY
-
-#define __FIRST_PER_CPU_ENTRY (FIRST_RESERVED_GDT_ENTRY + 8)
-
-#define __CPU_DESC_INDEX(x,field) \
- ((x) * sizeof(struct per_cpu_gdt) + offsetof(struct per_cpu_gdt, field) + (__FIRST_PER_CPU_ENTRY*8))
-#define __LDT(n) (((n)<<1) + __FIRST_LDT_ENTRY)
-
-#define load_TR(cpu) asm volatile("ltr %w0"::"r" (__CPU_DESC_INDEX(cpu, tss)));
-#define __load_LDT(cpu) asm volatile("lldt %w0"::"r" (__CPU_DESC_INDEX(cpu, ldt)));
-#define clear_LDT(n) asm volatile("lldt %w0"::"r" (0))
-
-/*
- * Guest OS must provide its own code selectors, or use the one we provide. The
- * RPL must be 1, as we only create bounce frames to ring 1. Any LDT selector
- * value is okay. Note that checking only the RPL is insufficient: if the
- * selector is poked into an interrupt, trap or call gate then the RPL is
- * ignored when the gate is accessed.
- */
-#define VALID_SEL(_s) \
- (((((_s)>>3) < FIRST_RESERVED_GDT_ENTRY) || \
- (((_s)>>3) > LAST_RESERVED_GDT_ENTRY) || \
- ((_s)&4)) && \
- (((_s)&3) == 0))
-#define VALID_CODESEL(_s) ((_s) == FLAT_RING3_CS || VALID_SEL(_s))
-
-/* These are bitmasks for the first 32 bits of a descriptor table entry. */
-#define _SEGMENT_TYPE (15<< 8)
-#define _SEGMENT_S ( 1<<12) /* System descriptor (yes iff S==0) */
-#define _SEGMENT_DPL ( 3<<13) /* Descriptor Privilege Level */
-#define _SEGMENT_P ( 1<<15) /* Segment Present */
-#define _SEGMENT_G ( 1<<23) /* Granularity */
-
-#ifndef __ASSEMBLY__
-
-enum {
- GATE_INTERRUPT = 0xE,
- GATE_TRAP = 0xF,
- GATE_CALL = 0xC,
-};
-
-// 16byte gate
-struct gate_struct {
- u16 offset_low;
- u16 segment;
- unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
- u16 offset_middle;
- u32 offset_high;
- u32 zero1;
-} __attribute__((packed));
-
-// 8 byte segment descriptor
-struct desc_struct {
- u16 limit0;
- u16 base0;
- unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
- unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
-} __attribute__((packed));
-
-// LDT or TSS descriptor in the GDT. 16 bytes.
-struct ldttss_desc {
- u16 limit0;
- u16 base0;
- unsigned base1 : 8, type : 5, dpl : 2, p : 1;
- unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
- u32 base3;
- u32 zero1;
-} __attribute__((packed));
-
-// Union of above structures
-union desc_union {
- struct desc_struct seg;
- struct ldttss_desc ldttss;
- struct gate_struct gate;
-};
-
-struct per_cpu_gdt {
- struct ldttss_desc tss;
- struct ldttss_desc ldt;
-} __cacheline_aligned;
-
-
-struct Xgt_desc_struct {
- unsigned short size;
- unsigned long address;
-} __attribute__((packed));
-
-extern __u8 gdt_table[];
-extern __u8 gdt_end[];
-extern union desc_union *gdt;
-
-extern struct per_cpu_gdt gdt_cpu_table[];
-
-#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
-#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
-#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
-
-enum {
- DESC_TSS = 0x9,
- DESC_LDT = 0x2,
-};
-
-extern struct gate_struct *idt;
-
-#define idt_descr (*(struct Xgt_desc_struct *)((char *)&idt - 2))
-#define gdt_descr (*(struct Xgt_desc_struct *)((char *)&gdt - 2))
-
-extern void set_intr_gate(unsigned int irq, void * addr);
-extern void set_tss_desc(unsigned int n, void *addr);
-
-#endif /* !__ASSEMBLY__ */
-
-#endif
#ifndef _X86_64_REGS_H
#define _X86_64_REGS_H
-#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
-#define R15 0
-#define R14 8
-#define R13 16
-#define R12 24
-#define RBP 36
-#define RBX 40
-/* arguments: interrupts/hypercalls only save upto here*/
-#define R11 48
-#define R10 56
-#define R9 64
-#define R8 72
-#define RAX 80
-#define RCX 88
-#define RDX 96
-#define RSI 104
-#define RDI 112
-#define ORIG_RAX 120 /* = ERROR */
-/* end of arguments */
-/* cpu exception frame or undefined in case of fast hypercall. */
-#define RIP 128
-#define CS 136
-#define EFLAGS 144
-#define RSP 152
-#define SS 160
-#define ARGOFFSET R11
-#endif /* __ASSEMBLY__ */
-
-/* top of stack page */
-#define FRAME_SIZE 168
-
-#define PTRACE_SETOPTIONS 21
-
-/* options set using PTRACE_SETOPTIONS */
-#define PTRACE_O_TRACESYSGOOD 0x00000001
-
-/* Dummy values for ptrace */
-#define FS 1000
-#define GS 1008
-
-#ifndef __ASSEMBLY__
-
-struct xen_regs {
- unsigned long r15;
- unsigned long r14;
- unsigned long r13;
- unsigned long r12;
- unsigned long rbp;
- unsigned long rbx;
-/* arguments: non interrupts/hypercalls only save upto here*/
- unsigned long r11;
- unsigned long r10;
- unsigned long r9;
- unsigned long r8;
- unsigned long rax;
- unsigned long rcx;
- unsigned long rdx;
- unsigned long rsi;
- unsigned long rdi;
- unsigned long orig_rax;
-/* end of arguments */
-/* cpu exception frame or undefined */
- unsigned long rip;
- unsigned long cs;
- unsigned long eflags;
- unsigned long rsp;
- unsigned long ss;
-/* top of stack page */
-};
-
-#endif
-
-/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
-#define PTRACE_GETREGS 12
-#define PTRACE_SETREGS 13
-#define PTRACE_GETFPREGS 14
-#define PTRACE_SETFPREGS 15
-#define PTRACE_GETFPXREGS 18
-#define PTRACE_SETFPXREGS 19
-
-#if !defined(__ASSEMBLY__)
-
-#define instruction_pointer(regs) ((regs)->rip)
-extern void show_regs(struct xen_regs *);
-
-enum {
- EF_CF = 0x00000001,
- EF_PF = 0x00000004,
- EF_AF = 0x00000010,
- EF_ZF = 0x00000040,
- EF_SF = 0x00000080,
- EF_TF = 0x00000100,
- EF_IE = 0x00000200,
- EF_DF = 0x00000400,
- EF_OF = 0x00000800,
- EF_IOPL = 0x00003000,
- EF_IOPL_RING0 = 0x00000000,
- EF_IOPL_RING1 = 0x00001000,
- EF_IOPL_RING2 = 0x00002000,
- EF_NT = 0x00004000, /* nested task */
- EF_RF = 0x00010000, /* resume */
- EF_VM = 0x00020000, /* virtual mode */
- EF_AC = 0x00040000, /* alignment */
- EF_VIF = 0x00080000, /* virtual interrupt */
- EF_VIP = 0x00100000, /* virtual interrupt pending */
- EF_ID = 0x00200000, /* id */
-};
-
-#endif
+#include <asm/types.h>
+
+struct xen_regs
+{
+ u64 r15;
+ u64 r14;
+ u64 r13;
+ u64 r12;
+ u64 rbp;
+ u64 rbx;
+ u64 r11;
+ u64 r10;
+ u64 r9;
+ u64 r8;
+ u64 rax;
+ u64 rcx;
+ u64 rdx;
+ u64 rsi;
+ u64 rdi;
+ u32 error_code;
+ u32 entry_vector;
+ u64 rip;
+ u64 cs;
+ u64 eflags;
+ u64 rsp;
+ u64 ss;
+} __attribute__ ((packed));
+
+#define VM86_MODE(_r) ((_r)->eflags & EF_VM)
+#define RING_0(_r) (((_r)->cs & 3) == 0)
+#define RING_1(_r) (((_r)->cs & 3) == 1)
+#define RING_2(_r) (((_r)->cs & 3) == 2)
+#define RING_3(_r) (((_r)->cs & 3) == 3)
#endif